cpupool: Check for memory allocation failure on switching schedulers
authorKeir Fraser <keir@xen.org>
Sun, 6 Feb 2011 16:07:27 +0000 (16:07 +0000)
committerKeir Fraser <keir@xen.org>
Sun, 6 Feb 2011 16:07:27 +0000 (16:07 +0000)
When switching schedulers on a physical cpu due to a cpupool operation
check for a potential memory allocation failure and stop the operation
gracefully.

Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
xen/common/cpupool.c
xen/common/schedule.c
xen/include/xen/sched.h

index 72ae8a225254174030b5a42ad95f72e4edcaeb04..7fd3ad849b030b457573ec6fb1fd10dd38d2cf27 100644 (file)
@@ -202,10 +202,20 @@ static int cpupool_destroy(struct cpupool *c)
  */
 static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
 {
+    int ret;
+    struct cpupool *old;
+
     if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
         return -EBUSY;
+    old = per_cpu(cpupool, cpu);
     per_cpu(cpupool, cpu) = c;
-    schedule_cpu_switch(cpu, c);
+    ret = schedule_cpu_switch(cpu, c);
+    if ( ret )
+    {
+        per_cpu(cpupool, cpu) = old;
+        return ret;
+    }
+
     cpu_clear(cpu, cpupool_free_cpus);
     if (cpupool_moving_cpu == cpu)
     {
@@ -230,12 +240,19 @@ static long cpupool_unassign_cpu_helper(void *info)
     cpu_set(cpu, cpupool_free_cpus);
     if ( !ret )
     {
-        schedule_cpu_switch(cpu, NULL);
+        ret = schedule_cpu_switch(cpu, NULL);
+        if ( ret )
+        {
+            cpu_clear(cpu, cpupool_free_cpus);
+            goto out;
+        }
         per_cpu(cpupool, cpu) = NULL;
         cpupool_moving_cpu = -1;
         cpupool_put(cpupool_cpu_moving);
         cpupool_cpu_moving = NULL;
     }
+
+out:
     spin_unlock(&cpupool_lock);
     return ret;
 }
index dd348c952a3ca9b3f72fe731d2548433bc86f8bd..21509b6df193d50db2aca2432bd2c4629cf22e83 100644 (file)
@@ -1288,7 +1288,7 @@ void __init scheduler_init(void)
         BUG();
 }
 
-void schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
+int schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
 {
     unsigned long flags;
     struct vcpu *idle;
@@ -1297,11 +1297,18 @@ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
     struct scheduler *new_ops = (c == NULL) ? &ops : c->sched;
 
     if ( old_ops == new_ops )
-        return;
+        return 0;
 
     idle = idle_vcpu[cpu];
     ppriv = SCHED_OP(new_ops, alloc_pdata, cpu);
+    if ( ppriv == NULL )
+        return -ENOMEM;
     vpriv = SCHED_OP(new_ops, alloc_vdata, idle, idle->domain->sched_priv);
+    if ( vpriv == NULL )
+    {
+        SCHED_OP(new_ops, free_pdata, ppriv, cpu);
+        return -ENOMEM;
+    }
 
     pcpu_schedule_lock_irqsave(cpu, flags);
 
@@ -1318,6 +1325,8 @@ void schedule_cpu_switch(unsigned int cpu, struct cpupool *c)
 
     SCHED_OP(old_ops, free_vdata, vpriv_old);
     SCHED_OP(old_ops, free_pdata, ppriv_old, cpu);
+
+    return 0;
 }
 
 struct scheduler *scheduler_get_default(void)
index c5def0b03fe427bc06c1a18aaf357cf3c8cd2049..ce45800dbe185f85eecbe255752b8a75863d93b6 100644 (file)
@@ -607,7 +607,7 @@ struct scheduler;
 struct scheduler *scheduler_get_default(void);
 struct scheduler *scheduler_alloc(unsigned int sched_id, int *perr);
 void scheduler_free(struct scheduler *sched);
-void schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
+int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
 void vcpu_force_reschedule(struct vcpu *v);
 int cpu_disable_scheduler(unsigned int cpu);
 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);